* 7AP. We do not assume writes to the LVT deassering IRQs
* 8AP. We do not enable low power mode (deep sleep) during MP bootup
* 9AP. We do not use mixed mode
- *
- * Pentium
- * There is a marginal case where REP MOVS on 100MHz SMP
- * machines with B stepping processors can fail. XXX should provide
- * an L1cache=Writethrough or L1cache=off option.
- *
- * B stepping CPUs may hang. There are hardware work arounds
- * for this. We warn about it in case your board doesnt have the work
- * arounds. Basically thats so I can tell anyone with a B stepping
- * CPU and SMP problems "tough".
- *
- * Specific items [From Pentium Processor Specification Update]
- *
- * 1AP. Linux doesn't use remote read
- * 2AP. Linux doesn't trust APIC errors
- * 3AP. We work around this
- * 4AP. Linux never generated 3 interrupts of the same priority
- * to cause a lost local interrupt.
- * 5AP. Remote read is never used
- * 6AP. not affected - worked around in hardware
- * 7AP. not affected - worked around in hardware
- * 8AP. worked around in hardware - we get explicit CS errors if not
- * 9AP. only 'noapic' mode affected. Might generate spurious
- * interrupts, we log only the first one and count the
- * rest silently.
- * 10AP. not affected - worked around in hardware
- * 11AP. Linux reads the APIC between writes to avoid this, as per
- * the documentation. Make sure you preserve this as it affects
- * the C stepping chips too.
- * 12AP. not affected - worked around in hardware
- * 13AP. not affected - worked around in hardware
- * 14AP. we always deassert INIT during bootup
- * 15AP. not affected - worked around in hardware
- * 16AP. not affected - worked around in hardware
- * 17AP. not affected - worked around in hardware
- * 18AP. not affected - worked around in hardware
- * 19AP. not affected - worked around in BIOS
- *
- * If this sounds worrying believe me these bugs are either ___RARE___,
- * or are signal timing bugs worked around in hardware and there's
- * about nothing of note with C stepping upwards.
*/
-/* The 'big kernel lock' */
-spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
-
-struct tlb_state cpu_tlbstate[NR_CPUS] = {[0 ... NR_CPUS-1] = { 0 }};
-
/*
* the following functions deal with sending IPIs between CPUs.
*
__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
}
+/*
+ * ********* XEN NOTICE **********
+ * I've left the following comments lying around as they look liek they might
+ * be useful to get multiprocessor guest OSes going. However, I suspect the
+ * issues we face will be quite different so I've ripped out all the
+ * TLBSTATE logic (I didn't understand it anyway :-). These comments do
+ * not apply to Xen, therefore! -- Keir (8th Oct 2003).
+ */
/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
* writing to user space from interrupts. (Its not allowed anyway).
*
* Optimizations Manfred Spraul <manfred@colorfullife.com>
- */
-
-static volatile unsigned long flush_cpumask;
-#if 0
-static struct mm_struct * flush_mm;
-static unsigned long flush_va;
-#endif
-static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
-#define FLUSH_ALL 0xffffffff
-
-/*
- * We cannot call mmdrop() because we are in interrupt context,
- * instead update mm.cpu_vm_mask.
- */
-static void inline leave_mm (unsigned long cpu)
-{
- if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
- BUG();
- clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
-}
-
-/*
*
* The flush IPI assumes that a thread switch happens in this order:
* [cpu0: the cpu that switches]
*
* The good news is that cpu_tlbstate is local to each cpu, no
* write/read ordering problems.
- */
-
-/*
+ *
* TLB flush IPI:
*
* 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
* 2) Leave the mm if we are in the lazy tlb mode.
*/
+static volatile unsigned long flush_cpumask;
+static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
+#define FLUSH_ALL 0xffffffff
+
asmlinkage void smp_invalidate_interrupt (void)
{
unsigned long cpu = smp_processor_id();
if (!test_bit(cpu, &flush_cpumask))
return;
-#if 0
- if (flush_mm == cpu_tlbstate[cpu].active_mm) {
- if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
- if (flush_va == FLUSH_ALL)
-#endif
- local_flush_tlb();
-#if 0
- else
- __flush_tlb_one(flush_va);
- } else
- leave_mm(cpu);
- }
-#endif
+ local_flush_tlb();
+
ack_APIC_irq();
clear_bit(cpu, &flush_cpumask);
}
static inline void do_flush_tlb_all_local(void)
{
- unsigned long cpu = smp_processor_id();
-
__flush_tlb_all();
- if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
- leave_mm(cpu);
}
static void flush_tlb_all_ipi(void* info)
}
}
-static inline void do_process_times(struct task_struct *p,
- unsigned long user, unsigned long system)
-{
-}
-
-
-void update_one_process(struct task_struct *p, unsigned long user,
- unsigned long system, int cpu)
-{
- do_process_times(p, user, system);
-}
-
-/*
- * Called from the timer interrupt handler to charge one tick to the current
- * process. user_tick is 1 if the tick is user time, 0 for system.
- */
-void update_process_times(int user_tick)
-{
- struct task_struct *p = current;
- int cpu = smp_processor_id(), system = user_tick ^ 1;
-
- update_one_process(p, user_tick, system, cpu);
-
- if ( --p->counter <= 0 )
- {
- p->counter = 0;
- set_bit(_HYP_EVENT_NEED_RESCHED, &p->hyp_events);
- }
-}
-
-
/* jiffies at the most recent update of wall time */
unsigned long wall_jiffies;
void do_timer(struct pt_regs *regs)
{
-
(*(unsigned long *)&jiffies)++;
- if ( !using_apic_timer )
- update_process_times(user_mode(regs));
-
mark_bh(TIMER_BH);
if (TQ_ACTIVE(tq_timer))
mark_bh(TQUEUE_BH);
#include <xeno/spinlock.h>
struct mm_struct {
- unsigned long cpu_vm_mask;
/*
* Every domain has a L1 pagetable of its own. Per-domain mappings
* are put in this table (eg. the current GDT is mapped here).
extern struct mm_struct init_mm;
#define IDLE0_MM \
{ \
- cpu_vm_mask: 0, \
perdomain_pt: 0, \
pagetable: mk_pagetable(__pa(idle_pg_table)) \
}
unsigned int max_pages; /* max number of pages that can be possesed */
/* scheduling */
- struct list_head run_list; /* the run list */
+ struct list_head run_list;
int has_cpu;
- int policy;
- int counter;
- struct ac_timer blt; /* blocked timeout */
-
s_time_t lastschd; /* time this domain was last scheduled */
s_time_t cpu_time; /* total CPU time received till now */
s_time_t wokenup; /* time domain got woken up */
char name[MAX_DOMAIN_NAME];
- /*
- * active_mm stays for now. It's entangled in the tricky TLB flushing
- * stuff which I haven't addressed yet. It stays until I'm man enough
- * to venture in.
- */
- struct mm_struct *active_mm;
struct thread_struct thread;
struct task_struct *prev_task, *next_task, *next_hash;
#define TASK_SUSPENDED 8
#define TASK_DYING 16
-#define SCHED_YIELD 0x10
-
#include <asm/uaccess.h> /* for KERNEL_DS */
#define IDLE0_TASK(_t) \
avt: 0xffffffff, \
mm: IDLE0_MM, \
addr_limit: KERNEL_DS, \
- active_mm: &idle0_task.mm, \
thread: INIT_THREAD, \
prev_task: &(_t), \
next_task: &(_t) \